[IA64] Allow guest to set the address of shared_info.
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 23 Jun 2006 15:46:39 +0000 (09:46 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Fri, 23 Jun 2006 15:46:39 +0000 (09:46 -0600)
Add a new hypercall: SET_SHARED_INFO_VA.
Cleanup of asm-xsi-offsets: do not define absolute address, use a new macro.
Cleanup of linux asm-offsets: use a macro for xen mapped regs.
xensetup.S: set the shared_info address (disabled if using compatibility).
privop.h: May redefined XSI_BASE (not yet enabled for compatibility).
Vocabulary coherence: use XMAPPEDREGS_ prefix.
Cleanup of xensystem.h

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
17 files changed:
linux-2.6-xen-sparse/arch/ia64/kernel/asm-offsets.c
linux-2.6-xen-sparse/arch/ia64/xen/xensetup.S
linux-2.6-xen-sparse/include/asm-ia64/xen/privop.h
xen/arch/ia64/asm-xsi-offsets.c
xen/arch/ia64/vmx/vmx_entry.S
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/faults.c
xen/arch/ia64/xen/hypercall.c
xen/arch/ia64/xen/hyperprivop.S
xen/arch/ia64/xen/ivt.S
xen/arch/ia64/xen/vcpu.c
xen/arch/ia64/xen/xenasm.S
xen/include/asm-ia64/dom_fw.h
xen/include/asm-ia64/domain.h
xen/include/asm-ia64/xenkregs.h
xen/include/asm-ia64/xensystem.h
xen/include/public/arch-ia64.h

index f018e7e8c9613e108ab8f568624c0abc5765a56d..3be81cc8d1daf165f5bcf4af316aa0b2d9f54a07 100644 (file)
@@ -265,34 +265,25 @@ void foo(void)
 #ifdef CONFIG_XEN
        BLANK();
 
-       DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_mask_addr)));
-       DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
-       DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
-       DEFINE(XSI_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifs)));
-       DEFINE(XSI_PRECOVER_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t, precover_ifs)));
-       DEFINE(XSI_ISR_OFS, (XSI_OFS + offsetof(mapped_regs_t, isr)));
-       DEFINE(XSI_IFA_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifa)));
-       DEFINE(XSI_IIPA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iipa)));
-       DEFINE(XSI_IIM_OFS, (XSI_OFS + offsetof(mapped_regs_t, iim)));
-       DEFINE(XSI_TPR_OFS, (XSI_OFS + offsetof(mapped_regs_t, tpr)));
-       DEFINE(XSI_IHA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iha)));
-       DEFINE(XSI_ITIR_OFS, (XSI_OFS + offsetof(mapped_regs_t, itir)));
-       DEFINE(XSI_ITV_OFS, (XSI_OFS + offsetof(mapped_regs_t, itv)));
-       DEFINE(XSI_PTA_OFS, (XSI_OFS + offsetof(mapped_regs_t, pta)));
-       DEFINE(XSI_PSR_IC_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_collection_enabled)));
-       DEFINE(XSI_PEND_OFS, (XSI_OFS + offsetof(mapped_regs_t, pending_interruption)));
-       DEFINE(XSI_INCOMPL_REGFR_OFS, (XSI_OFS + offsetof(mapped_regs_t, incomplete_regframe)));
-       DEFINE(XSI_METAPHYS_OFS, (XSI_OFS + offsetof(mapped_regs_t, metaphysical_mode)));
-
-       DEFINE(XSI_BANKNUM_OFS, (XSI_OFS + offsetof(mapped_regs_t, banknum)));
-
-       DEFINE(XSI_BANK0_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t, bank0_regs[0])));
-       DEFINE(XSI_BANK1_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t, bank1_regs[0])));
-       DEFINE(XSI_B0NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vbnat)));
-       DEFINE(XSI_B1NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vnat)));
-       DEFINE(XSI_RR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, rrs[0])));
-       DEFINE(XSI_KR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, krs[0])));
-       DEFINE(XSI_PKR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, pkrs[0])));
-       DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
+#define DEFINE_MAPPED_REG_OFS(sym, field) \
+       DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
+
+       DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
+       DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
+       DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
+       DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
+       DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
+       DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
+       DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
+       DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
+       DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
+       DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
+       DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
+       DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
+       DEFINE_MAPPED_REG_OFS(XSI_PEND_OFS, pending_interruption);
+       DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
+       DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
+       DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
+       DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
 #endif /* CONFIG_XEN */
 }
index 7c47280aedc016fa48a5f177c52feeeb15597691..ebcab830d33b3b2a1edbd4c7867b161da0d3eeea 100644 (file)
@@ -15,10 +15,21 @@ GLOBAL_ENTRY(early_xen_setup)
        mov r8=ar.rsc           // Initialized in head.S
 (isBP) movl r9=running_on_xen;;
        extr.u r8=r8,2,2;;      // Extract pl fields
-       cmp.ne p7,p0=r8,r0;;    // p7: running on xen 
-(p7)   mov r8=1                // booleanize.
-(p7)   movl r10=xen_ivt;;
+       cmp.eq p7,p0=r8,r0      // p7: !running on xen
+       mov r8=1                // booleanize.
+(p7)   br.ret.sptk.many rp;;
 (isBP) st4 [r9]=r8
-(p7)   mov cr.iva=r10
-       br.ret.sptk.many rp;;
+       movl r10=xen_ivt;;
+       
+       mov cr.iva=r10
+
+#if XSI_BASE != 0xf100000000000000UL
+       /* Backward compatibility.  */
+(isBP) mov r2=0x600
+(isBP) movl r28=XSI_BASE;;
+(isBP) break 0x1000;;
+#endif
+
+       br.ret.sptk.many rp
+       ;;
 END(early_xen_setup)
index 6d4c747c7a643c62202bc9dd87a988277f6c3419..a9e4cd83eaef38a37a0294472f0683583de3caf7 100644 (file)
 
 #define IA64_PARAVIRTUALIZED
 
-#define XSI_OFS                XSI_SIZE
-#define XPRIVREG_BASE  (XSI_BASE + XSI_SIZE)
+#if 0
+#undef XSI_BASE
+/* At 1 MB, before per-cpu space but still addressable using addl instead
+   of movl. */
+#define XSI_BASE                               0xfffffffffff00000
+#endif
+
+/* Address of mapped regs.  */
+#define XMAPPEDREGS_BASE               (XSI_BASE + XSI_SIZE)
 
 #ifdef __ASSEMBLY__
 #define        XEN_HYPER_RFI                   break HYPERPRIVOP_RFI
@@ -98,16 +105,16 @@ extern void xen_set_eflag(unsigned long);  /* see xen_ia64_setreg */
  * Others, like "pend", are abstractions based on privileged registers.
  * "Pend" is guaranteed to be set if reading cr.ivr would return a
  * (non-spurious) interrupt. */
-#define XEN_PRIVREGS ((struct mapped_regs *)XPRIVREG_BASE)
+#define XEN_MAPPEDREGS ((struct mapped_regs *)XMAPPEDREGS_BASE)
 #define XSI_PSR_I                      \
-       (*XEN_PRIVREGS->interrupt_mask_addr)
+       (*XEN_MAPPEDREGS->interrupt_mask_addr)
 #define xen_get_virtual_psr_i()                \
        (!XSI_PSR_I)
 #define xen_set_virtual_psr_i(_val)    \
        ({ XSI_PSR_I = (uint8_t)(_val) ? 0 : 1; })
 #define xen_set_virtual_psr_ic(_val)   \
-       ({ XEN_PRIVREGS->interrupt_collection_enabled = _val ? 1 : 0; })
-#define xen_get_virtual_pend()         (XEN_PRIVREGS->pending_interruption)
+       ({ XEN_MAPPEDREGS->interrupt_collection_enabled = _val ? 1 : 0; })
+#define xen_get_virtual_pend()         (XEN_MAPPEDREGS->pending_interruption)
 
 /* Hyperprivops are "break" instructions with a well-defined API.
  * In particular, the virtual psr.ic bit must be off; in this way
index 1ea734da59bfd98398385d6a24e0bbdad120a5c2..2e438420b090715c906791106318bc58d6f20a9e 100755 (executable)
 
 #define BLANK() asm volatile("\n->" : : )
 
-#define OFFSET(_sym, _str, _mem) \
-    DEFINE(_sym, offsetof(_str, _mem));
+#define DEFINE_MAPPED_REG_OFS(sym, field) \
+       DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
 
 void foo(void)
 {
-       /* First is shared info page, and then arch specific vcpu context */
-       //DEFINE(XSI_BASE, SHAREDINFO_ADDR);
-
-       DEFINE(XSI_PSR_I_ADDR_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_mask_addr)));
-       DEFINE(XSI_PSR_I_ADDR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_mask_addr)));
-       DEFINE(XSI_IPSR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ipsr)));
-       DEFINE(XSI_IPSR_OFS, (XSI_OFS + offsetof(mapped_regs_t, ipsr)));
-       DEFINE(XSI_IIP_OFS, (XSI_OFS + offsetof(mapped_regs_t, iip)));
-       DEFINE(XSI_IIP, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iip)));
-       DEFINE(XSI_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifs)));
-       DEFINE(XSI_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifs)));
-       DEFINE(XSI_PRECOVER_IFS_OFS, (XSI_OFS + offsetof(mapped_regs_t, precover_ifs)));
-       DEFINE(XSI_PRECOVER_IFS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, precover_ifs)));
-       DEFINE(XSI_ISR_OFS, (XSI_OFS + offsetof(mapped_regs_t, isr)));
-       DEFINE(XSI_ISR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, isr)));
-       DEFINE(XSI_IFA_OFS, (XSI_OFS + offsetof(mapped_regs_t, ifa)));
-       DEFINE(XSI_IFA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, ifa)));
-       DEFINE(XSI_IIPA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iipa)));
-       DEFINE(XSI_IIPA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iipa)));
-       DEFINE(XSI_IIM_OFS, (XSI_OFS + offsetof(mapped_regs_t, iim)));
-       DEFINE(XSI_IIM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iim)));
-       DEFINE(XSI_TPR_OFS, (XSI_OFS + offsetof(mapped_regs_t, tpr)));
-       DEFINE(XSI_TPR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tpr)));
-       DEFINE(XSI_IHA_OFS, (XSI_OFS + offsetof(mapped_regs_t, iha)));
-       DEFINE(XSI_IHA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, iha)));
-       DEFINE(XSI_ITIR_OFS, (XSI_OFS + offsetof(mapped_regs_t, itir)));
-       DEFINE(XSI_ITIR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itir)));
-       DEFINE(XSI_ITV_OFS, (XSI_OFS + offsetof(mapped_regs_t, itv)));
-       DEFINE(XSI_ITV, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, itv)));
-       DEFINE(XSI_PTA_OFS, (XSI_OFS + offsetof(mapped_regs_t, pta)));
-       DEFINE(XSI_PTA, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pta)));
-       DEFINE(XSI_PSR_IC_OFS, (XSI_OFS + offsetof(mapped_regs_t, interrupt_collection_enabled)));
-       DEFINE(XSI_PSR_IC, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, interrupt_collection_enabled)));
-       DEFINE(XSI_PEND_OFS, (XSI_OFS + offsetof(mapped_regs_t, pending_interruption)));
-       DEFINE(XSI_PEND, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pending_interruption)));
-       DEFINE(XSI_INCOMPL_REGFR_OFS, (XSI_OFS + offsetof(mapped_regs_t, incomplete_regframe)));
-       DEFINE(XSI_INCOMPL_REGFR, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, incomplete_regframe)));
-       DEFINE(XSI_METAPHYS_OFS, (XSI_OFS + offsetof(mapped_regs_t, metaphysical_mode)));
-       DEFINE(XSI_METAPHYS, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, metaphysical_mode)));
-
-       DEFINE(XSI_BANKNUM_OFS, (XSI_OFS + offsetof(mapped_regs_t, banknum)));
-       DEFINE(XSI_BANKNUM, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, banknum)));
-
-       DEFINE(XSI_BANK0_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t, bank0_regs[0])));
-       DEFINE(XSI_BANK0_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank0_regs[0])));
-       DEFINE(XSI_BANK1_R16_OFS, (XSI_OFS + offsetof(mapped_regs_t, bank1_regs[0])));
-       DEFINE(XSI_BANK1_R16, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, bank1_regs[0])));
-       DEFINE(XSI_B0NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vbnat)));
-       DEFINE(XSI_B1NATS_OFS, (XSI_OFS + offsetof(mapped_regs_t, vnat)));
-       DEFINE(XSI_RR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, rrs[0])));
-       DEFINE(XSI_RR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, rrs[0])));
-       DEFINE(XSI_KR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, krs[0])));
-       DEFINE(XSI_KR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, krs[0])));
-       DEFINE(XSI_PKR0_OFS, (XSI_OFS + offsetof(mapped_regs_t, pkrs[0])));
-       DEFINE(XSI_PKR0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, pkrs[0])));
-       DEFINE(XSI_TMP0_OFS, (XSI_OFS + offsetof(mapped_regs_t, tmp[0])));
-       DEFINE(XSI_TMP0, (SHARED_ARCHINFO_ADDR+offsetof(mapped_regs_t, tmp[0])));
+       DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
+       DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
+       DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
+       DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
+       DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
+       DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
+       DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
+       DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
+       DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
+       DEFINE_MAPPED_REG_OFS(XSI_TPR_OFS, tpr);
+       DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
+       DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
+       DEFINE_MAPPED_REG_OFS(XSI_ITV_OFS, itv);
+       DEFINE_MAPPED_REG_OFS(XSI_PTA_OFS, pta);
+       DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
+       DEFINE_MAPPED_REG_OFS(XSI_PEND_OFS, pending_interruption);
+       DEFINE_MAPPED_REG_OFS(XSI_INCOMPL_REGFR_OFS, incomplete_regframe);
+       DEFINE_MAPPED_REG_OFS(XSI_METAPHYS_OFS, metaphysical_mode);
+       DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
+       DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
+       DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
+       DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
+       DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
+       DEFINE_MAPPED_REG_OFS(XSI_RR0_OFS, rrs[0]);
+       DEFINE_MAPPED_REG_OFS(XSI_KR0_OFS, krs[0]);
 }
index 51b5f2cb8519c6c027933ef13d73b5cbea5f7016..fed208b21b59c44c019bf3483956b3380ed61940 100644 (file)
@@ -675,39 +675,6 @@ GLOBAL_ENTRY(vmx_switch_rr7)
    itr.d dtr[r24]=loc2     // wire in new mapping...
    ;;
 
-
-#if    0
-   // re-pin mappings for shared_info
-
-   mov r24=IA64_TR_SHARED_INFO
-   movl r25=__pgprot(__DIRTY_BITS | _PAGE_PL_2 | _PAGE_AR_RW)
-   ;;
-   or loc3 = r25,loc3          // construct PA | page properties
-   mov r23 = PAGE_SHIFT<<2
-   ;;
-   ptr.d   in1,r23
-   ;;
-   mov cr.itir=r23
-   mov cr.ifa=in1
-   ;;
-   itr.d dtr[r24]=loc3     // wire in new mapping...
-   ;;
-   // re-pin mappings for shared_arch_info
-
-   mov r24=IA64_TR_ARCH_INFO
-   or loc4 = r25,loc4          // construct PA | page properties
-   mov r23 = PAGE_SHIFT<<2
-   ;;
-   ptr.d   in2,r23
-   ;;
-   mov cr.itir=r23
-   mov cr.ifa=in2
-   ;;
-   itr.d dtr[r24]=loc4     // wire in new mapping...
-   ;;
-#endif
-
-
    // re-pin mappings for guest_vhpt
 
    mov r24=IA64_TR_PERVP_VHPT
index 7c915b53d9dac85a716a8ecc5e6acb65f91bf5a8..bf4b8f573d016f8bbcf987da39e6fdc3605100c2 100644 (file)
@@ -88,6 +88,7 @@ extern struct vcpu *ia64_switch_to (struct vcpu *next_task);
 /* Address of vpsr.i (in fact evtchn_upcall_mask) of current vcpu.
    This is a Xen virtual address.  */
 DEFINE_PER_CPU(uint8_t *, current_psr_i_addr);
+DEFINE_PER_CPU(int *, current_psr_ic_addr);
 
 #include <xen/sched-if.h>
 
@@ -106,6 +107,8 @@ void schedule_tail(struct vcpu *prev)
                vcpu_load_kernel_regs(current);
                __ia64_per_cpu_var(current_psr_i_addr) = &current->domain->
                  shared_info->vcpu_info[current->vcpu_id].evtchn_upcall_mask;
+               __ia64_per_cpu_var(current_psr_ic_addr) = (int *)
+                 (current->domain->arch.shared_info_va + XSI_PSR_IC_OFS);
        }
 }
 
@@ -159,6 +162,8 @@ if (!i--) { i = 1000000; printk("+"); }
                        vcpu_pend_timer(current);
                __ia64_per_cpu_var(current_psr_i_addr) = &nd->shared_info->
                  vcpu_info[current->vcpu_id].evtchn_upcall_mask;
+               __ia64_per_cpu_var(current_psr_ic_addr) =
+                 (int *)(nd->arch.shared_info_va + XSI_PSR_IC_OFS);
        } else {
                /* When switching to idle domain, only need to disable vhpt
                 * walker. Then all accesses happen within idle context will
@@ -167,6 +172,7 @@ if (!i--) { i = 1000000; printk("+"); }
                pta = ia64_get_pta();
                ia64_set_pta(pta & ~VHPT_ENABLED);
                __ia64_per_cpu_var(current_psr_i_addr) = NULL;
+               __ia64_per_cpu_var(current_psr_ic_addr) = NULL;
         }
     }
     local_irq_restore(spsr);
@@ -304,7 +310,7 @@ static void init_switch_stack(struct vcpu *v)
 int arch_domain_create(struct domain *d)
 {
        // the following will eventually need to be negotiated dynamically
-       d->arch.shared_info_va = SHAREDINFO_ADDR;
+       d->arch.shared_info_va = DEFAULT_SHAREDINFO_ADDR;
        d->arch.breakimm = 0x1000;
 
        if (is_idle_domain(d))
@@ -514,6 +520,41 @@ void build_physmap_table(struct domain *d)
        d->arch.physmap_built = 1;
 }
 
+unsigned long
+domain_set_shared_info_va (unsigned long va)
+{
+       struct vcpu *v = current;
+       struct domain *d = v->domain;
+       struct vcpu *v1;
+
+       /* Check virtual address:
+          must belong to region 7,
+          must be 64Kb aligned,
+          must not be within Xen virtual space.  */
+       if ((va >> 61) != 7
+           || (va & 0xffffUL) != 0
+           || (va >= HYPERVISOR_VIRT_START && va < HYPERVISOR_VIRT_END))
+               panic_domain (NULL, "%s: bad va (0x%016lx)\n", __func__, va);
+
+       /* Note: this doesn't work well if other cpus are already running.
+          However this is part of the spec :-)  */
+       printf ("Domain set shared_info_va to 0x%016lx\n", va);
+       d->arch.shared_info_va = va;
+
+       for_each_vcpu (d, v1) {
+               VCPU(v1, interrupt_mask_addr) = 
+                       (unsigned char *)va + INT_ENABLE_OFFSET(v1);
+       }
+
+       __ia64_per_cpu_var(current_psr_ic_addr) = (int *)(va + XSI_PSR_IC_OFS);
+
+       /* Remap the shared pages.  */
+       set_one_rr (7UL << 61, PSCB(v,rrs[7]));
+
+       return 0;
+}
+
+
 // remove following line if not privifying in memory
 //#define HAVE_PRIVIFY_MEMORY
 #ifndef HAVE_PRIVIFY_MEMORY
index eb4e6091c8ebf797f12d26fe858f894adfd0c37d..5c2981dbab6f8081aab60031a298e619c171f863 100644 (file)
@@ -118,7 +118,7 @@ void reflect_interruption(unsigned long isr, struct pt_regs *regs, unsigned long
 
        regs->cr_iip = ((unsigned long) PSCBX(v,iva) + vector) & ~0xffUL;
        regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
-       regs->r31 = XSI_IPSR;
+       regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
 
        v->vcpu_info->evtchn_upcall_mask = 1;
        PSCB(v,interrupt_collection_enabled) = 0;
@@ -172,7 +172,7 @@ void reflect_event(struct pt_regs *regs)
 
        regs->cr_iip = v->arch.event_callback_ip;
        regs->cr_ipsr = (regs->cr_ipsr & ~DELIVER_PSR_CLR) | DELIVER_PSR_SET;
-       regs->r31 = XSI_IPSR;
+       regs->r31 = current->domain->arch.shared_info_va + XSI_IPSR_OFS;
 
        v->vcpu_info->evtchn_upcall_mask = 1;
        PSCB(v,interrupt_collection_enabled) = 0;
index e02783503e35049608ca828482412b197be578de..50e8331e0775ac66e551d5c2a755ca58859f235a 100644 (file)
@@ -267,6 +267,9 @@ fw_hypercall (struct pt_regs *regs)
            case FW_HYPERCALL_IPI:
                fw_hypercall_ipi (regs);
                break;
+           case FW_HYPERCALL_SET_SHARED_INFO_VA:
+               regs->r8 = domain_set_shared_info_va (regs->r28);
+               break;
            case FW_HYPERCALL_FPSWA:
                fpswa_ret = fw_hypercall_fpswa (v);
                regs->r8  = fpswa_ret.status;
index 9544d630e53531b8dc3fa4bd2d6cc272f30ec5a4..443fde9249168b123becabbff8365b95f134f96f 100644 (file)
@@ -304,9 +304,13 @@ ENTRY(hyper_ssm_i)
        add r24=r24,r23;;
        mov cr.iip=r24;;
        // OK, now all set to go except for switch to virtual bank0
-       mov r30=r2; mov r29=r3;;
+       mov r30=r2
+       mov r29=r3
+       mov r28=r4
+       ;;
        adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
        adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
+       adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
        bsw.1;;
        // FIXME?: ar.unat is not really handled correctly,
        // but may not matter if the OS is NaT-clean
@@ -326,9 +330,11 @@ ENTRY(hyper_ssm_i)
        .mem.offset 8,0; st8.spill [r3]=r29,16 ;;
        .mem.offset 0,0; st8.spill [r2]=r30,16;
        .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
-       movl r31=XSI_IPSR;;
+       mov r31=r4
        bsw.0 ;;
-       mov r2=r30; mov r3=r29;;
+       mov r2=r30
+       mov r3=r29
+       mov r4=r28
        adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
        st4 [r20]=r0 ;;
        mov pr=r31,-1 ;;
@@ -372,7 +378,10 @@ GLOBAL_ENTRY(fast_tick_reflect)
        st8 [r20]=r21;;
 #endif
        // vcpu_pend_timer(current)
-       movl r18=XSI_PSR_IC;;
+       movl r18=THIS_CPU(current_psr_ic_addr)
+       ;;
+       ld8 r18=[r18]
+       ;;
        adds r20=XSI_ITV_OFS-XSI_PSR_IC_OFS,r18 ;;
        ld8 r20=[r20];;
        cmp.eq p6,p0=r20,r0     // if cr.itv==0 done
@@ -481,12 +490,17 @@ GLOBAL_ENTRY(fast_tick_reflect)
        add r24=r24,r23;;
        mov cr.iip=r24;;
        // OK, now all set to go except for switch to virtual bank0
-       mov r30=r2; mov r29=r3;;
+       mov r30=r2
+       mov r29=r3
+       mov r27=r4
 #ifdef HANDLE_AR_UNAT
        mov r28=ar.unat;
 #endif
-       adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
-       adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
+       ;;
+       adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18
+       adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
+       adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
+       ;;
        bsw.1;;
        .mem.offset 0,0; st8.spill [r2]=r16,16;
        .mem.offset 8,0; st8.spill [r3]=r17,16 ;;
@@ -506,28 +520,32 @@ GLOBAL_ENTRY(fast_tick_reflect)
        .mem.offset 8,0; st8.spill [r3]=r31,16 ;;
 #ifdef HANDLE_AR_UNAT
        // r16~r23 are preserved regsin bank0 regs, we need to restore them,
-    // r24~r31 are scratch regs, we don't need to handle NaT bit,
-    // because OS handler must assign it before access it
-    ld8 r16=[r2],16;
-    ld8 r17=[r3],16;;
-    ld8 r18=[r2],16;
-    ld8 r19=[r3],16;;
-    ld8 r20=[r2],16;
-    ld8 r21=[r3],16;;
-    ld8 r22=[r2],16;
-    ld8 r23=[r3],16;;
+       // r24~r31 are scratch regs, we don't need to handle NaT bit,
+       // because OS handler must assign it before access it
+       ld8 r16=[r2],16;
+       ld8 r17=[r3],16;;
+       ld8 r18=[r2],16;
+       ld8 r19=[r3],16;;
+       ld8 r20=[r2],16;
+       ld8 r21=[r3],16;;
+       ld8 r22=[r2],16;
+       ld8 r23=[r3],16;;
 #endif
-    movl r31=XSI_IPSR;;
-    bsw.0 ;;
-    mov r24=ar.unat;
-    mov r2=r30; mov r3=r29;;
+       mov r31=r4
+       ;;
+       bsw.0 ;;
+       mov r24=ar.unat;
+       mov r2=r30
+       mov r3=r29
+       mov r4=r27
 #ifdef HANDLE_AR_UNAT
-    mov ar.unat=r28;
+       mov ar.unat=r28;
 #endif
-    adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
-    adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
-    st8 [r25]=r24;
-    st4 [r20]=r0 ;;
+       ;;
+       adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
+       adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
+       st8 [r25]=r24;
+       st4 [r20]=r0 ;;
 fast_tick_reflect_done:
        mov pr=r31,-1 ;;
        rfi
@@ -659,12 +677,16 @@ ENTRY(fast_reflect)
        add r20=r20,r23;;
        mov cr.iip=r20;;
        // OK, now all set to go except for switch to virtual bank0
-       mov r30=r2; mov r29=r3;;
+       mov r30=r2
+       mov r29=r3
 #ifdef HANDLE_AR_UNAT
        mov r28=ar.unat;
 #endif
+       mov r27=r4
        adds r2=XSI_BANK1_R16_OFS-XSI_PSR_IC_OFS,r18;
-       adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18;;
+       adds r3=(XSI_BANK1_R16_OFS+8)-XSI_PSR_IC_OFS,r18
+       adds r4=XSI_IPSR_OFS-XSI_PSR_IC_OFS,r18
+       ;;
        bsw.1;;
        .mem.offset 0,0; st8.spill [r2]=r16,16;
        .mem.offset 8,0; st8.spill [r3]=r17,16 ;;
@@ -687,24 +709,28 @@ ENTRY(fast_reflect)
     // r24~r31 are scratch regs, we don't need to handle NaT bit,
     // because OS handler must assign it before access it
        ld8 r16=[r2],16;
-    ld8 r17=[r3],16;;
-    ld8 r18=[r2],16;
-    ld8 r19=[r3],16;;
+       ld8 r17=[r3],16;;
+       ld8 r18=[r2],16;
+       ld8 r19=[r3],16;;
        ld8 r20=[r2],16;
-    ld8 r21=[r3],16;;
-    ld8 r22=[r2],16;
-    ld8 r23=[r3],16;;
+       ld8 r21=[r3],16;;
+       ld8 r22=[r2],16;
+       ld8 r23=[r3],16;;
 #endif
-       movl r31=XSI_IPSR;;
+       mov r31=r4
+       ;;
        bsw.0 ;;
-    mov r24=ar.unat;
-       mov r2=r30; mov r3=r29;;
+       mov r24=ar.unat;
+       mov r2=r30
+       mov r3=r29
 #ifdef HANDLE_AR_UNAT
        mov ar.unat=r28;
 #endif
-    adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
+       mov r4=r27
+       ;;
+       adds r25=XSI_B1NATS_OFS-XSI_PSR_IC_OFS,r18 ;
        adds r20=XSI_BANKNUM_OFS-XSI_PSR_IC_OFS,r18 ;;
-    st8 [r25]=r24;
+       st8 [r25]=r24;
        st4 [r20]=r0 ;;
        mov pr=r31,-1 ;;
        rfi
@@ -732,7 +758,8 @@ GLOBAL_ENTRY(fast_access_reflect)
        extr.u r21=r30,IA64_PSR_CPL0_BIT,2 ;;
        cmp.eq p7,p0=r21,r0
 (p7)   br.spnt.few dispatch_reflection ;;
-       movl r18=XSI_PSR_IC;;
+       movl r18=THIS_CPU(current_psr_ic_addr);;
+       ld8 r18=[r18];;
        ld4 r21=[r18];;
        cmp.eq p7,p0=r0,r21
 (p7)   br.spnt.few dispatch_reflection ;;
@@ -1043,8 +1070,8 @@ ENTRY(hyper_rfi)
        // validate vcr.iip, if in Xen range, do it the slow way
        adds r20=XSI_IIP_OFS-XSI_PSR_IC_OFS,r18 ;;
        ld8 r22=[r20];;
-       movl r23=XEN_VIRT_SPACE_LOW
-       movl r24=XEN_VIRT_SPACE_HIGH ;;
+       movl r23=HYPERVISOR_VIRT_START
+       movl r24=HYPERVISOR_VIRT_END;;
        cmp.ltu p0,p7=r22,r23 ;;        // if !(iip<low) &&
 (p7)   cmp.geu p0,p7=r22,r24 ;;        //    !(iip>=high)
 (p7)   br.spnt.few dispatch_break_fault ;;
index 338abdf8345b64af3976784ac3602d9ec94ba917..e358762c06795f1019f80ce89ce5f6c72c672e37 100644 (file)
@@ -508,10 +508,9 @@ late_alt_dtlb_miss:
        movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
        mov r21=cr.ipsr
        ;;
-#else
 #endif
 #ifdef CONFIG_DISABLE_VHPT
-       shr.u r22=r16,61                        // get the region number into r21
+       shr.u r22=r16,61                        // get the region into r22
        ;;
        cmp.gt p8,p0=6,r22                      // access to region 0-5
        ;;
@@ -992,7 +991,9 @@ ENTRY(break_fault)
         cmp.eq p7,p0=r17,r18 ;; 
 (p7)    br.spnt.few dispatch_break_fault ;;
 #endif
-       movl r18=XSI_PSR_IC
+       movl r18=THIS_CPU(current_psr_ic_addr)
+       ;;
+       ld8 r18=[r18]
        ;;
        ld4 r19=[r18]
        ;;
index b92721223faf18309113b03d2330001f92840119..1581632540c5f67a995161e024b862979fb2a5c8 100644 (file)
@@ -1354,7 +1354,7 @@ check_xen_space_overlap (const char *func, u64 base, u64 page_size)
        base &= ~(page_size - 1);
 
        /* FIXME: ideally an MCA should be generated...  */
-       if (range_overlap (XEN_VIRT_SPACE_LOW, XEN_VIRT_SPACE_HIGH,
+       if (range_overlap (HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END,
                           base, base + page_size))
                panic_domain (NULL, "%s on Xen virtual space (%lx)\n",
                              func, base);
index ebaad08343a35869796b46e03ac7cbb0945c0d9e..4b12c706ae5292cb21da3d4fc616438d57fe3934 100644 (file)
@@ -10,7 +10,8 @@
 #include <asm/processor.h>
 #include <asm/pgtable.h>
 #include <asm/vhpt.h>
-
+#include <public/arch-ia64.h>
+       
 // Change rr7 to the passed value while ensuring
 // Xen is mapped into the new region.
 #define PSR_BITS_TO_CLEAR                                              \
@@ -140,8 +141,8 @@ GLOBAL_ENTRY(ia64_new_rr7)
        ;;
        itr.d dtr[r21]=r23              // wire in new mapping...
        
-       // Map for arch_vcpu_info_t
-       movl r22=XSI_OFS
+       // Map mapped_regs
+       mov r22=XMAPPEDREGS_OFS
        mov r24=PAGE_SHIFT<<2
        ;; 
        add r22=r22,in3
@@ -150,7 +151,7 @@ GLOBAL_ENTRY(ia64_new_rr7)
        or r23=loc7,r25                 // construct PA | page properties
        mov cr.itir=r24
        mov cr.ifa=r22
-       mov r21=IA64_TR_ARCH_INFO
+       mov r21=IA64_TR_MAPPED_REGS
        ;;
        itr.d dtr[r21]=r23              // wire in new mapping...
 
@@ -239,19 +240,24 @@ GLOBAL_ENTRY(__get_domain_bundle)
 END(__get_domain_bundle)
 
 GLOBAL_ENTRY(dorfirfi)
-        movl r16 = XSI_IIP
-        movl r17 = XSI_IPSR
-        movl r18 = XSI_IFS
+       // Read current vcpu shared info
+       movl r16=THIS_CPU(current_psr_ic_addr)
+       ;;
+       ld8 r19 = [r16]
+       ;;
+       add r16 = XSI_IIP_OFS - XSI_PSR_IC_OFS, r19
+       add r17 = XSI_IPSR_OFS - XSI_PSR_IC_OFS, r19
+       add r18 = XSI_IFS_OFS - XSI_PSR_IC_OFS, r19
        ;;
        ld8 r16 = [r16]
        ld8 r17 = [r17]
        ld8 r18 = [r18]
        ;;
-        mov cr.iip=r16
-        mov cr.ipsr=r17
-        mov cr.ifs=r18
+       mov cr.iip=r16
+       mov cr.ipsr=r17
+       mov cr.ifs=r18
        ;;
-        rfi
+       rfi
        ;;
 END(dorfirfi)
 
index 488b9b52b7cf10c82c1d756e734e2f7ec2ce5109..ea941ab8ac10d86f33106b5f8f6a3fbca4f25f24 100644 (file)
 #define FW_HYPERCALL_FPSWA_PATCH_PADDR                 FW_HYPERCALL_PADDR(FW_HYPERCALL_FPSWA_PATCH_INDEX)
 #define FW_HYPERCALL_FPSWA                             0x500UL
 
+/* Set the shared_info base virtual address.  */
+#define FW_HYPERCALL_SET_SHARED_INFO_VA                        0x600UL
+
 /* Hypercalls index bellow _FIRST_ARCH are reserved by Xen, while those above
    are for the architecture.
    Note: this limit was defined by Xen/ia64 (and not by Xen).²
index ceb5e890e9ba3863f54713d584d7dfa40c6a6142..d244e055e802202ce460e24309bef42cc5296e73 100644 (file)
@@ -27,7 +27,7 @@ p2m_entry_set(struct p2m_entry* entry, volatile pte_t* pte, pte_t used)
 static inline int
 p2m_entry_retry(struct p2m_entry* entry)
 {
-    //XXX see lookup_domian_pte().
+    //XXX see lookup_domain_pte().
     //    NULL is set for invalid gpaddr for the time being.
     if (entry->pte == NULL)
         return 0;
@@ -41,6 +41,9 @@ extern void domain_relinquish_resources(struct domain *);
 extern unsigned long translate_domain_mpaddr(unsigned long mpaddr,
                                              struct p2m_entry* entry);
 
+/* Set shared_info virtual address.  */
+extern unsigned long domain_set_shared_info_va (unsigned long va);
+
 /* Flush cache of domain d.
    If sync_only is true, only synchronize I&D caches,
    if false, flush and invalidate caches.  */
index 5a6c6efa9978e8a7a8b87a2478e965837231ffeb..dcfaf65d6ba337ce3c1ba0c25fefeae6f358cb3d 100644 (file)
@@ -6,7 +6,7 @@
  */
 #define IA64_TR_SHARED_INFO    3       /* dtr3: page shared with domain */
 #define        IA64_TR_VHPT            4       /* dtr4: vhpt */
-#define IA64_TR_ARCH_INFO      5
+#define IA64_TR_MAPPED_REGS    5       /* dtr5: vcpu mapped regs */
 #define IA64_TR_PERVP_VHPT     6
 #define IA64_DTR_GUEST_KERNEL   7
 #define IA64_ITR_GUEST_KERNEL   2
index 4aa45006527b846b98cbc617849b114e21485f5c..a385075a9ac33078693b0c04099557a324e205b2 100644 (file)
 /* Define HV space hierarchy.
    VMM memory space is protected by CPL for paravirtualized domains and
    by VA for VTi domains.  VTi imposes VA bit 60 != VA bit 59 for VMM.  */
-#define XEN_VIRT_SPACE_LOW      0xe800000000000000
-#define XEN_VIRT_SPACE_HIGH     0xf800000000000000     
 
-#define __IA64_UNCACHED_OFFSET  0xe800000000000000UL
-
-#define XEN_START_ADDR          0xf000000000000000
-#define HYPERVISOR_VIRT_START   0xf000000000000000
+#define HYPERVISOR_VIRT_START   0xe800000000000000
 #define KERNEL_START            0xf000000004000000
-#define SHAREDINFO_ADDR                 0xf100000000000000
-#define XSI_OFS                 PAGE_SIZE
-#define SHARED_ARCHINFO_ADDR    (SHAREDINFO_ADDR + XSI_OFS)
-#define PERCPU_ADDR             (SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
+#define DEFAULT_SHAREDINFO_ADDR         0xf100000000000000
+#define PERCPU_ADDR             (DEFAULT_SHAREDINFO_ADDR - PERCPU_PAGE_SIZE)
 #define VHPT_ADDR               0xf200000000000000
 #ifdef CONFIG_VIRTUAL_FRAME_TABLE
 #define VIRT_FRAME_TABLE_ADDR   0xf300000000000000
 #define VIRT_FRAME_TABLE_END    0xf400000000000000
 #endif
-#define XEN_END_ADDR            0xf400000000000000
+#define HYPERVISOR_VIRT_END     0xf800000000000000
 
-#define PAGE_OFFSET    __IA64_UL_CONST(0xf000000000000000)
+#define PAGE_OFFSET             __IA64_UL_CONST(0xf000000000000000)
+#define __IA64_UNCACHED_OFFSET  0xe800000000000000UL
 
 #define IS_VMM_ADDRESS(addr) ((((addr) >> 60) ^ ((addr) >> 59)) & 1)
 
index bdd592241344514dcaf9d50f9c914a2fbe004c25..90f027a773ded54073f5501799592d8c6b954ed0 100644 (file)
@@ -380,13 +380,17 @@ DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
 
 #endif /* !__ASSEMBLY__ */
 
-/* Address of shared_info in domain virtual space.  */
-#define XSI_BASE       0xf100000000000000
+/* Address of shared_info in domain virtual space.
+   This is the default address, for compatibility only.  */
+#define XSI_BASE                               0xf100000000000000
+
 /* Size of the shared_info area (this is not related to page size).  */
-#define XSI_LOG_SIZE   14
-#define XSI_SIZE       (1 << XSI_LOG_SIZE)
+#define XSI_LOG_SIZE                   14
+#define XSI_SIZE                               (1 << XSI_LOG_SIZE)
 /* Log size of mapped_regs area (64 KB - only 4KB is used).  */
-#define XASI_LOG_SIZE  16
+#define XMAPPEDREGS_LOG_SIZE   16
+/* Offset of XASI (Xen arch shared info) wrt XSI_BASE.  */
+#define XMAPPEDREGS_OFS                        XSI_SIZE
 
 /* Hyperprivops.  */
 #define HYPERPRIVOP_RFI                        0x1